In [1]:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#   LEARN FCN00
#

from __future__ import print_function
import argparse
import os

import numpy as np
import pickle
from keras import backend as K
from keras.callbacks import ModelCheckpoint
from keras.models import Model
from keras.layers import Input
from keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Concatenate, AveragePooling2D
from keras.layers import merge
from keras.optimizers import Adam, SGD, RMSprop
from keras.preprocessing.image import list_pictures, array_to_img

from image_ext import list_pictures_in_multidir, load_imgs_asarray, img_dice_coeff

# MAXPOOLING
#from create_fcn import create_fcn01, create_fcn02, create_fcn00
# AVERAGE POOLING
from create_fcn_avpool import create_fcn01,create_fcn00

np.random.seed(2016)
/home/nakazawa_atsushi/anaconda3/envs/py3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters
Using TensorFlow backend.
/home/nakazawa_atsushi/anaconda3/envs/py3/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6
  return f(*args, **kwds)
In [2]:
def dice_coef(y_true, y_pred):
    y_true = K.flatten(y_true)
    y_pred = K.flatten(y_pred)
    intersection = K.sum(y_true * y_pred)
    return (2.*intersection + 1) / (K.sum(y_true) + K.sum(y_pred) + 1)

def dice_coef_loss(y_true, y_pred):
    return -dice_coef(y_true, y_pred)
In [3]:
def load_fnames(paths):
    f = open(paths)
    data1 = f.read()
    f.close()
    lines = data1.split('\n')
    #print(len(lines))
    # 最終行は空行なので消す
    del(lines[len(lines)-1])
    #print(len(lines))
    return lines
In [4]:
def make_fnames(fnames,fpath,fpath_mask,mask_ext):
    fnames_img = [];
    fnames_mask= [];
    
    for i in range(len(fnames)):
        fnames_img.append(fpath + '/' + fnames[i]);
        fnames_mask.append(fpath_mask + '/' + mask_ext + fnames[i]);
        
    return [fnames_img,fnames_mask]
In [5]:
#
#  MAIN STARTS FROM HERE
#
if __name__ == '__main__':
    
    target_size = (224, 224)
    dpath_this = './'
    dname_checkpoints = 'checkpoints_fcn00_avpool.augumented'
    dname_checkpoints_fcn01 = 'checkpoints_fcn01_avpool'
    dname_outputs = 'outputs'
    fname_architecture = 'architecture.json'
    fname_weights = "model_weights_{epoch:02d}.h5"
    fname_stats = 'stats01.npz'
    dim_ordering = 'channels_first'
    fname_history = "history.pkl"

    # definision of mode, LEARN or TEST or SHOW_HISTORY
    #mode = "LEARN"
    #mode = "SHOW_HISTORY"
    #mode = "TEST"

    # モデルを作成
    print('creating model fcn00 and fcn01...')
    #model_fcn02 = create_fcn02(target_size)
    model_fcn01 = create_fcn01(target_size)
    model_fcn00 = create_fcn00(target_size)
    
    if os.path.exists(dname_checkpoints) == 0:
        os.mkdir(dname_checkpoints)
creating model fcn00 and fcn01...
In [6]:
#
#   LEARNING MODE
#
mode = "LEARN"
if mode == "LEARN":
    # Read Learning Data
#    fnames = load_fnames('data/list_train_01.txt')
#    [fpaths_xs_train,fpaths_ys_train] = make_fnames(fnames,'data/img','data/mask','OperatorA_')
#    fnames = load_fnames('data.nnlab/list_train_01.txt')
#    fnames = load_fnames('data/list_train_01.txt')
    fnames = load_fnames('data_augumented/list_train_01.txt')
#    [fpaths_xs_train,fpaths_ys_train] = make_fnames(fnames,'data.nnlab/image','data.nnlab/gt','')
    [fpaths_xs_train,fpaths_ys_train] = make_fnames(fnames,'data_augumented/img','data_augumented/mask','')

    print('reading training data')
    X_train = load_imgs_asarray(fpaths_xs_train, grayscale=False, target_size=target_size,
                                dim_ordering=dim_ordering)
    print('reading traking gt data')
    Y_train = load_imgs_asarray(fpaths_ys_train, grayscale=True, target_size=target_size,
                                dim_ordering=dim_ordering) 

    # Read Validation Data
#    fnames = load_fnames('data/list_valid_01.txt')
#    [fpaths_xs_valid,fpaths_ys_valid] = make_fnames(fnames,'data/img','data/mask','OperatorA_')
    fnames = load_fnames('data_augumented/list_valid_01.txt')
    [fpaths_xs_valid,fpaths_ys_valid] = make_fnames(fnames,'data_augumented/img','data_augumented/mask','')
    
    print('reading validation data')
    X_valid = load_imgs_asarray(fpaths_xs_valid, grayscale=False, target_size=target_size,
                                dim_ordering=dim_ordering)
    Y_valid = load_imgs_asarray(fpaths_ys_valid, grayscale=True, target_size=target_size,
                                dim_ordering=dim_ordering)     

    print('==> ' + str(len(X_train)) + ' training images loaded')
    print('==> ' + str(len(Y_train)) + ' training masks loaded')
    print('==> ' + str(len(X_valid)) + ' validation images loaded')
    print('==> ' + str(len(Y_valid)) + ' validation masks loaded')

    # 前処理
    print('computing mean and standard deviation...')
    mean = np.mean(X_train, axis=(0, 2, 3))
    std = np.std(X_train, axis=(0, 2, 3))
    print('==> mean: ' + str(mean))
    print('==> std : ' + str(std))
reading training data
reading traking gt data
reading validation data
==> 7800 training images loaded
==> 7800 training masks loaded
==> 1196 validation images loaded
==> 1196 validation masks loaded
computing mean and standard deviation...
==> mean: [125.60018   90.205666  77.57043 ]
==> std : [61.01421  47.890713 51.63054 ]
In [7]:
    print('saving mean and standard deviation to ' + fname_stats + '...')
    stats = {'mean': mean, 'std': std}
    np.savez(dname_checkpoints + '/' + fname_stats, **stats)
    print('==> done')

    print('globally normalizing data...')
    for i in range(3):
        X_train[:, i] = (X_train[:, i] - mean[i]) / std[i]
        X_valid[:, i] = (X_valid[:, i] - mean[i]) / std[i]
    Y_train /= 255
    Y_valid /= 255
    print('==> done')
saving mean and standard deviation to stats01.npz...
==> done
globally normalizing data...
==> done
In [8]:
    init_from_fcn01 = 0
    
    if init_from_fcn01 == 1:
        # モデルに学習済のfcn01 Weightをロードする
        epoch = 100
        fname_weights = 'model_weights_%02d.h5'%(epoch)
        fpath_weights_fcn01 = os.path.join(dname_checkpoints_fcn01, fname_weights)
        model_fcn01.load_weights(fpath_weights_fcn01)
        #print('==> done')

        # load weights from Learned U-NET
        layer_names = ['conv1_1','conv1_2','conv2_1','conv2_2','conv3_1','conv3_2',
                       'conv4_1','conv4_2','conv5_1', 'conv5_2',
                    'up1_1', 'up1_2', 'up2_1', 'up2_2', 'up3_1', 'up3_2', 'up4_1', 
                       'up4_2', 'conv_fin']
        layer_names = ['conv1_1','conv1_2','conv2_1','conv2_2',
                    'up1_1', 'up1_2', 'up2_1', 'up2_2', 'conv_fin']

        print('copying layer weights')
        for name in layer_names:
            print(name)
            model_fcn00.get_layer(name).set_weights(model_fcn01.get_layer(name).get_weights())
            model_fcn00.get_layer(name).trainable = True
In [9]:
    # 損失関数,最適化手法を定義
    adam = Adam(lr=1e-5)
    sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.95, nesterov=True)
    #rmsprop = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
    model_fcn00.compile(optimizer=adam, loss=dice_coef_loss, metrics=[dice_coef])

    # 構造・重みを保存するディレクトリーの有無を確認
    dpath_checkpoints = os.path.join(dpath_this, dname_checkpoints)
    if not os.path.isdir(dpath_checkpoints):
        os.mkdir(dpath_checkpoints)

    # 重みを保存するためのオブジェクトを用意
    fname_weights = "model_weights_{epoch:02d}.h5"
    fpath_weights = os.path.join(dpath_checkpoints, fname_weights)
    checkpointer = ModelCheckpoint(filepath=fpath_weights, save_best_only=False)      
In [10]:
    # トレーニングを開始
    print('start training...')
    history = model_fcn00.fit(X_train[:,:,:,:], Y_train[:,:,:,:], batch_size=64, epochs=200, verbose=1,
                  shuffle=True, validation_data=(X_valid, Y_valid), callbacks=[checkpointer])
start training...
Train on 7800 samples, validate on 1196 samples
Epoch 1/200
7800/7800 [==============================] - 353s 45ms/step - loss: -0.0991 - dice_coef: 0.0991 - val_loss: -0.2066 - val_dice_coef: 0.2066
Epoch 2/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.4541 - dice_coef: 0.4541 - val_loss: -0.4807 - val_dice_coef: 0.4807
Epoch 3/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.5364 - dice_coef: 0.5364 - val_loss: -0.5662 - val_dice_coef: 0.5662
Epoch 4/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.6253 - dice_coef: 0.6253 - val_loss: -0.6159 - val_dice_coef: 0.6159
Epoch 5/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.6579 - dice_coef: 0.6579 - val_loss: -0.6611 - val_dice_coef: 0.6611
Epoch 6/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.6710 - dice_coef: 0.6710 - val_loss: -0.6791 - val_dice_coef: 0.6791
Epoch 7/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.6851 - dice_coef: 0.6851 - val_loss: -0.7006 - val_dice_coef: 0.7006
Epoch 8/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.6980 - dice_coef: 0.6980 - val_loss: -0.6913 - val_dice_coef: 0.6913
Epoch 9/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.7076 - dice_coef: 0.7076 - val_loss: -0.7079 - val_dice_coef: 0.7079
Epoch 10/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.7180 - dice_coef: 0.7180 - val_loss: -0.6920 - val_dice_coef: 0.6920
Epoch 11/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.7246 - dice_coef: 0.7246 - val_loss: -0.6907 - val_dice_coef: 0.6907
Epoch 12/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.7342 - dice_coef: 0.7342 - val_loss: -0.7562 - val_dice_coef: 0.7562
Epoch 13/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.7454 - dice_coef: 0.7454 - val_loss: -0.7291 - val_dice_coef: 0.7291
Epoch 14/200
7800/7800 [==============================] - 311s 40ms/step - loss: -0.7561 - dice_coef: 0.7561 - val_loss: -0.7559 - val_dice_coef: 0.7559
Epoch 15/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.7701 - dice_coef: 0.7701 - val_loss: -0.7636 - val_dice_coef: 0.7636
Epoch 16/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.7737 - dice_coef: 0.7737 - val_loss: -0.7346 - val_dice_coef: 0.7346
Epoch 17/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.7889 - dice_coef: 0.7889 - val_loss: -0.7632 - val_dice_coef: 0.7632
Epoch 18/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.7968 - dice_coef: 0.7968 - val_loss: -0.7187 - val_dice_coef: 0.7187
Epoch 19/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.8036 - dice_coef: 0.8036 - val_loss: -0.7615 - val_dice_coef: 0.7615
Epoch 20/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.8089 - dice_coef: 0.8089 - val_loss: -0.7608 - val_dice_coef: 0.7608
Epoch 21/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.8149 - dice_coef: 0.8149 - val_loss: -0.7731 - val_dice_coef: 0.7731
Epoch 22/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.8198 - dice_coef: 0.8198 - val_loss: -0.7874 - val_dice_coef: 0.7874
Epoch 23/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.8240 - dice_coef: 0.8240 - val_loss: -0.7744 - val_dice_coef: 0.7744
Epoch 24/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.8273 - dice_coef: 0.8273 - val_loss: -0.7965 - val_dice_coef: 0.7965
Epoch 25/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.8330 - dice_coef: 0.8330 - val_loss: -0.7822 - val_dice_coef: 0.7822
Epoch 26/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.8372 - dice_coef: 0.8372 - val_loss: -0.7532 - val_dice_coef: 0.7532
Epoch 27/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.8350 - dice_coef: 0.8350 - val_loss: -0.8007 - val_dice_coef: 0.8007
Epoch 28/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.8467 - dice_coef: 0.8467 - val_loss: -0.7620 - val_dice_coef: 0.7620
Epoch 29/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.8470 - dice_coef: 0.8470 - val_loss: -0.7819 - val_dice_coef: 0.7819
Epoch 30/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.8523 - dice_coef: 0.8523 - val_loss: -0.7915 - val_dice_coef: 0.7915
Epoch 31/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.8575 - dice_coef: 0.8575 - val_loss: -0.7832 - val_dice_coef: 0.7832
Epoch 32/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.8575 - dice_coef: 0.8575 - val_loss: -0.7892 - val_dice_coef: 0.7892
Epoch 33/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.8645 - dice_coef: 0.8645 - val_loss: -0.8005 - val_dice_coef: 0.8005
Epoch 34/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.8673 - dice_coef: 0.8673 - val_loss: -0.8017 - val_dice_coef: 0.8017
Epoch 35/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.8668 - dice_coef: 0.8668 - val_loss: -0.7832 - val_dice_coef: 0.7832
Epoch 36/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.8727 - dice_coef: 0.8727 - val_loss: -0.7930 - val_dice_coef: 0.7930
Epoch 37/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.8741 - dice_coef: 0.8741 - val_loss: -0.7969 - val_dice_coef: 0.7969
Epoch 38/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.8794 - dice_coef: 0.8794 - val_loss: -0.7985 - val_dice_coef: 0.7985
Epoch 39/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.8808 - dice_coef: 0.8808 - val_loss: -0.7935 - val_dice_coef: 0.7935
Epoch 40/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.8837 - dice_coef: 0.8837 - val_loss: -0.8017 - val_dice_coef: 0.8017
Epoch 41/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.8884 - dice_coef: 0.8884 - val_loss: -0.8064 - val_dice_coef: 0.8064
Epoch 42/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.8934 - dice_coef: 0.8934 - val_loss: -0.7947 - val_dice_coef: 0.7947
Epoch 43/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.8962 - dice_coef: 0.8962 - val_loss: -0.8031 - val_dice_coef: 0.8031
Epoch 44/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.8972 - dice_coef: 0.8972 - val_loss: -0.8074 - val_dice_coef: 0.8074
Epoch 45/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9011 - dice_coef: 0.9011 - val_loss: -0.8066 - val_dice_coef: 0.8066
Epoch 46/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9037 - dice_coef: 0.9037 - val_loss: -0.8062 - val_dice_coef: 0.8062
Epoch 47/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9053 - dice_coef: 0.9053 - val_loss: -0.7971 - val_dice_coef: 0.7971
Epoch 48/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9069 - dice_coef: 0.9069 - val_loss: -0.8081 - val_dice_coef: 0.8081
Epoch 49/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9130 - dice_coef: 0.9130 - val_loss: -0.8023 - val_dice_coef: 0.8023
Epoch 50/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9121 - dice_coef: 0.9121 - val_loss: -0.7981 - val_dice_coef: 0.7981
Epoch 51/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9165 - dice_coef: 0.9165 - val_loss: -0.8048 - val_dice_coef: 0.8048
Epoch 52/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9164 - dice_coef: 0.9164 - val_loss: -0.7930 - val_dice_coef: 0.7930
Epoch 53/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9193 - dice_coef: 0.9193 - val_loss: -0.8000 - val_dice_coef: 0.8000
Epoch 54/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9214 - dice_coef: 0.9214 - val_loss: -0.8073 - val_dice_coef: 0.8073
Epoch 55/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9243 - dice_coef: 0.9243 - val_loss: -0.8102 - val_dice_coef: 0.8102
Epoch 56/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9253 - dice_coef: 0.9253 - val_loss: -0.8127 - val_dice_coef: 0.8127
Epoch 57/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9262 - dice_coef: 0.9262 - val_loss: -0.8003 - val_dice_coef: 0.8003
Epoch 58/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9316 - dice_coef: 0.9316 - val_loss: -0.8030 - val_dice_coef: 0.8030
Epoch 59/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9304 - dice_coef: 0.9304 - val_loss: -0.8016 - val_dice_coef: 0.8016
Epoch 60/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9305 - dice_coef: 0.9305 - val_loss: -0.8080 - val_dice_coef: 0.8080
Epoch 61/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9336 - dice_coef: 0.9336 - val_loss: -0.8046 - val_dice_coef: 0.8046
Epoch 62/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9328 - dice_coef: 0.9328 - val_loss: -0.8111 - val_dice_coef: 0.8111
Epoch 63/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9368 - dice_coef: 0.9368 - val_loss: -0.8046 - val_dice_coef: 0.8046
Epoch 64/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9392 - dice_coef: 0.9392 - val_loss: -0.8096 - val_dice_coef: 0.8096
Epoch 65/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9397 - dice_coef: 0.9397 - val_loss: -0.8039 - val_dice_coef: 0.8039
Epoch 66/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9407 - dice_coef: 0.9407 - val_loss: -0.8059 - val_dice_coef: 0.8059
Epoch 67/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9367 - dice_coef: 0.9367 - val_loss: -0.8052 - val_dice_coef: 0.8052
Epoch 68/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9430 - dice_coef: 0.9430 - val_loss: -0.8083 - val_dice_coef: 0.8083
Epoch 69/200
7800/7800 [==============================] - 310s 40ms/step - loss: -0.9413 - dice_coef: 0.9413 - val_loss: -0.8049 - val_dice_coef: 0.8049
Epoch 70/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9437 - dice_coef: 0.9437 - val_loss: -0.8035 - val_dice_coef: 0.8035
Epoch 71/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9479 - dice_coef: 0.9479 - val_loss: -0.8047 - val_dice_coef: 0.8047
Epoch 72/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9455 - dice_coef: 0.9455 - val_loss: -0.8034 - val_dice_coef: 0.8034
Epoch 73/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9472 - dice_coef: 0.9472 - val_loss: -0.8081 - val_dice_coef: 0.8081
Epoch 74/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9466 - dice_coef: 0.9466 - val_loss: -0.8015 - val_dice_coef: 0.8015
Epoch 75/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9493 - dice_coef: 0.9493 - val_loss: -0.8061 - val_dice_coef: 0.8061
Epoch 76/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9473 - dice_coef: 0.9473 - val_loss: -0.8019 - val_dice_coef: 0.8019
Epoch 77/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9489 - dice_coef: 0.9489 - val_loss: -0.8058 - val_dice_coef: 0.8058
Epoch 78/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9506 - dice_coef: 0.9506 - val_loss: -0.8048 - val_dice_coef: 0.8048
Epoch 79/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9487 - dice_coef: 0.9487 - val_loss: -0.8038 - val_dice_coef: 0.8038
Epoch 80/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9524 - dice_coef: 0.9524 - val_loss: -0.8042 - val_dice_coef: 0.8042
Epoch 81/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9519 - dice_coef: 0.9519 - val_loss: -0.8052 - val_dice_coef: 0.8052
Epoch 82/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9537 - dice_coef: 0.9537 - val_loss: -0.8034 - val_dice_coef: 0.8034
Epoch 83/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9538 - dice_coef: 0.9538 - val_loss: -0.8046 - val_dice_coef: 0.8046
Epoch 84/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9550 - dice_coef: 0.9550 - val_loss: -0.8032 - val_dice_coef: 0.8032
Epoch 85/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9542 - dice_coef: 0.9542 - val_loss: -0.8014 - val_dice_coef: 0.8014
Epoch 86/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9583 - dice_coef: 0.9583 - val_loss: -0.8064 - val_dice_coef: 0.8064
Epoch 87/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9576 - dice_coef: 0.9576 - val_loss: -0.8011 - val_dice_coef: 0.8011
Epoch 88/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9575 - dice_coef: 0.9575 - val_loss: -0.8046 - val_dice_coef: 0.8046
Epoch 89/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9558 - dice_coef: 0.9558 - val_loss: -0.8048 - val_dice_coef: 0.8048
Epoch 90/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9559 - dice_coef: 0.9559 - val_loss: -0.8042 - val_dice_coef: 0.8042
Epoch 91/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9580 - dice_coef: 0.9580 - val_loss: -0.8045 - val_dice_coef: 0.8045
Epoch 92/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9584 - dice_coef: 0.9584 - val_loss: -0.8052 - val_dice_coef: 0.8052
Epoch 93/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9569 - dice_coef: 0.9569 - val_loss: -0.8061 - val_dice_coef: 0.8061
Epoch 94/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9598 - dice_coef: 0.9598 - val_loss: -0.8065 - val_dice_coef: 0.8065
Epoch 95/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9589 - dice_coef: 0.9589 - val_loss: -0.8023 - val_dice_coef: 0.8023
Epoch 96/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9593 - dice_coef: 0.9593 - val_loss: -0.8019 - val_dice_coef: 0.8019
Epoch 97/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9591 - dice_coef: 0.9591 - val_loss: -0.8056 - val_dice_coef: 0.8056
Epoch 98/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9601 - dice_coef: 0.9601 - val_loss: -0.8040 - val_dice_coef: 0.8040
Epoch 99/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9611 - dice_coef: 0.9611 - val_loss: -0.8056 - val_dice_coef: 0.8056
Epoch 100/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9606 - dice_coef: 0.9606 - val_loss: -0.8034 - val_dice_coef: 0.8034
Epoch 101/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9613 - dice_coef: 0.9613 - val_loss: -0.8035 - val_dice_coef: 0.8035
Epoch 102/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9607 - dice_coef: 0.9607 - val_loss: -0.8021 - val_dice_coef: 0.8021
Epoch 103/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9622 - dice_coef: 0.9622 - val_loss: -0.8075 - val_dice_coef: 0.8075
Epoch 104/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9623 - dice_coef: 0.9623 - val_loss: -0.8045 - val_dice_coef: 0.8045
Epoch 105/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9647 - dice_coef: 0.9647 - val_loss: -0.8015 - val_dice_coef: 0.8015
Epoch 106/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9629 - dice_coef: 0.9629 - val_loss: -0.8048 - val_dice_coef: 0.8048
Epoch 107/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9647 - dice_coef: 0.9647 - val_loss: -0.8073 - val_dice_coef: 0.8073
Epoch 108/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9639 - dice_coef: 0.9639 - val_loss: -0.8069 - val_dice_coef: 0.8069
Epoch 109/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9631 - dice_coef: 0.9631 - val_loss: -0.8064 - val_dice_coef: 0.8064
Epoch 110/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9629 - dice_coef: 0.9629 - val_loss: -0.8036 - val_dice_coef: 0.8036
Epoch 111/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9654 - dice_coef: 0.9654 - val_loss: -0.8045 - val_dice_coef: 0.8045
Epoch 112/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9645 - dice_coef: 0.9645 - val_loss: -0.8051 - val_dice_coef: 0.8051
Epoch 113/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9647 - dice_coef: 0.9647 - val_loss: -0.8019 - val_dice_coef: 0.8019
Epoch 114/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9648 - dice_coef: 0.9648 - val_loss: -0.8037 - val_dice_coef: 0.8037
Epoch 115/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9652 - dice_coef: 0.9652 - val_loss: -0.8055 - val_dice_coef: 0.8055
Epoch 116/200
7800/7800 [==============================] - 308s 39ms/step - loss: -0.9637 - dice_coef: 0.9637 - val_loss: -0.8026 - val_dice_coef: 0.8026
Epoch 117/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9668 - dice_coef: 0.9668 - val_loss: -0.8059 - val_dice_coef: 0.8059
Epoch 118/200
7800/7800 [==============================] - 308s 39ms/step - loss: -0.9641 - dice_coef: 0.9641 - val_loss: -0.8057 - val_dice_coef: 0.8057
Epoch 119/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9654 - dice_coef: 0.9654 - val_loss: -0.8056 - val_dice_coef: 0.8056
Epoch 120/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9648 - dice_coef: 0.9648 - val_loss: -0.8059 - val_dice_coef: 0.8059
Epoch 121/200
7800/7800 [==============================] - 308s 39ms/step - loss: -0.9674 - dice_coef: 0.9674 - val_loss: -0.8005 - val_dice_coef: 0.8005
Epoch 122/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9651 - dice_coef: 0.9651 - val_loss: -0.8086 - val_dice_coef: 0.8086
Epoch 123/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9654 - dice_coef: 0.9654 - val_loss: -0.8032 - val_dice_coef: 0.8032
Epoch 124/200
7800/7800 [==============================] - 308s 39ms/step - loss: -0.9659 - dice_coef: 0.9659 - val_loss: -0.8046 - val_dice_coef: 0.8046
Epoch 125/200
7800/7800 [==============================] - 308s 39ms/step - loss: -0.9687 - dice_coef: 0.9687 - val_loss: -0.8024 - val_dice_coef: 0.8024
Epoch 126/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9680 - dice_coef: 0.9680 - val_loss: -0.8055 - val_dice_coef: 0.8055
Epoch 127/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9658 - dice_coef: 0.9658 - val_loss: -0.8043 - val_dice_coef: 0.8043
Epoch 128/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9672 - dice_coef: 0.9672 - val_loss: -0.8041 - val_dice_coef: 0.8041
Epoch 129/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9690 - dice_coef: 0.9690 - val_loss: -0.8022 - val_dice_coef: 0.8022
Epoch 130/200
7800/7800 [==============================] - 308s 39ms/step - loss: -0.9673 - dice_coef: 0.9673 - val_loss: -0.8048 - val_dice_coef: 0.8048
Epoch 131/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9688 - dice_coef: 0.9688 - val_loss: -0.8014 - val_dice_coef: 0.8014
Epoch 132/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9716 - dice_coef: 0.9716 - val_loss: -0.8042 - val_dice_coef: 0.8042
Epoch 133/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9682 - dice_coef: 0.9682 - val_loss: -0.8044 - val_dice_coef: 0.8044
Epoch 134/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9699 - dice_coef: 0.9699 - val_loss: -0.8041 - val_dice_coef: 0.8041
Epoch 135/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9685 - dice_coef: 0.9685 - val_loss: -0.8060 - val_dice_coef: 0.8060
Epoch 136/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9708 - dice_coef: 0.9708 - val_loss: -0.8042 - val_dice_coef: 0.8042
Epoch 137/200
7800/7800 [==============================] - 308s 40ms/step - loss: -0.9682 - dice_coef: 0.9682 - val_loss: -0.8029 - val_dice_coef: 0.8029
Epoch 138/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9680 - dice_coef: 0.9680 - val_loss: -0.8050 - val_dice_coef: 0.8050
Epoch 139/200
7800/7800 [==============================] - 309s 40ms/step - loss: -0.9692 - dice_coef: 0.9692 - val_loss: -0.8049 - val_dice_coef: 0.8049
Epoch 140/200
6208/7800 [======================>.......] - ETA: 59s - loss: -0.9707 - dice_coef: 0.9707 
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-10-d069f3c8b433> in <module>()
      2 print('start training...')
      3 history = model_fcn00.fit(X_train[:,:,:,:], Y_train[:,:,:,:], batch_size=64, epochs=200, verbose=1,
----> 4               shuffle=True, validation_data=(X_valid, Y_valid), callbacks=[checkpointer])

~/anaconda3/envs/py3/lib/python3.6/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
   1655                               initial_epoch=initial_epoch,
   1656                               steps_per_epoch=steps_per_epoch,
-> 1657                               validation_steps=validation_steps)
   1658 
   1659     def evaluate(self, x=None, y=None,

~/anaconda3/envs/py3/lib/python3.6/site-packages/keras/engine/training.py in _fit_loop(self, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch, steps_per_epoch, validation_steps)
   1211                     batch_logs['size'] = len(batch_ids)
   1212                     callbacks.on_batch_begin(batch_index, batch_logs)
-> 1213                     outs = f(ins_batch)
   1214                     if not isinstance(outs, list):
   1215                         outs = [outs]

~/anaconda3/envs/py3/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
   2355         session = get_session()
   2356         updated = session.run(fetches=fetches, feed_dict=feed_dict,
-> 2357                               **self.session_kwargs)
   2358         return updated[:len(self.outputs)]
   2359 

~/anaconda3/envs/py3/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    887     try:
    888       result = self._run(None, fetches, feed_dict, options_ptr,
--> 889                          run_metadata_ptr)
    890       if run_metadata:
    891         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

~/anaconda3/envs/py3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
   1118     if final_fetches or final_targets or (handle and feed_dict_tensor):
   1119       results = self._do_run(handle, final_targets, final_fetches,
-> 1120                              feed_dict_tensor, options, run_metadata)
   1121     else:
   1122       results = []

~/anaconda3/envs/py3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1315     if handle is None:
   1316       return self._do_call(_run_fn, self._session, feeds, fetches, targets,
-> 1317                            options, run_metadata)
   1318     else:
   1319       return self._do_call(_prun_fn, self._session, handle, feeds, fetches)

~/anaconda3/envs/py3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
   1321   def _do_call(self, fn, *args):
   1322     try:
-> 1323       return fn(*args)
   1324     except errors.OpError as e:
   1325       message = compat.as_text(e.message)

~/anaconda3/envs/py3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1300           return tf_session.TF_Run(session, options,
   1301                                    feed_dict, fetch_list, target_list,
-> 1302                                    status, run_metadata)
   1303 
   1304     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 
In [11]:
    # Save History
    f = open(dname_checkpoints + '/' + fname_history,'wb')
    pickle.dump(history.history,f)
    f.close
Out[11]:
<function BufferedWriter.close>
In [14]:
#
#  TEST MODE
#
mode = 'TEST'
if mode == "TEST":
    # Prediction (test) mode
    
    # Read Test Data
    fnames = load_fnames('data/list_test_01.txt')
    #fnames = load_fnames('data.nnlab/list_test_01.txt')

    [fpaths_xs_test,fpaths_ys_test] = make_fnames(fnames,'data/img','data/mask','OperatorA_')
    #[fpaths_xs_test,fpaths_ys_test] = make_fnames(fnames,'data.nnlab/image','data.nnlab/gt','')

    X_test = load_imgs_asarray(fpaths_xs_test, grayscale=False, target_size=target_size,
                                dim_ordering=dim_ordering)
    Y_test = load_imgs_asarray(fpaths_ys_test, grayscale=True, target_size=target_size,
                                dim_ordering=dim_ordering)

    # トレーニング時に計算した平均・標準偏差をロード    
    print('loading mean and standard deviation from ' + fname_stats + '...')
    stats = np.load(dname_checkpoints + '/' + fname_stats)
    mean = stats['mean']
    std = stats['std']
    print('==> mean: ' + str(mean))
    print('==> std : ' + str(std))

    for i in range(3):
        X_test[:, i] = (X_test[:, i] - mean[i]) / std[i]
    print('==> done')
loading mean and standard deviation from stats01.npz...
==> mean: [125.60018   90.205666  77.57043 ]
==> std : [61.01421  47.890713 51.63054 ]
==> done
In [15]:
    from PIL import Image
    import matplotlib.pyplot as plt

    # 学習済みの重みをロード
    epoch = 100
    fname_weights = 'model_weights_%02d.h5'%(epoch)
    fpath_weights = os.path.join(dname_checkpoints, fname_weights)
    model_fcn00.load_weights(fpath_weights)
    print('==> done')

    # テストを開始
    outputs = model_fcn00.predict(X_test)
    #    outputs = model_fcn02.predict(X_test)

    # 出力を画像として保存
    dname_outputs = './outputs/'
    if not os.path.isdir(dname_outputs):
        print('create directory: %s'%(dname_outputs))
        os.mkdir(dname_outputs)

    print('saving outputs as images...')
    n = 0
    for i, array in enumerate(outputs):
        array = np.where(array > 0.5, 1, 0) # 二値に変換
        array = array.astype(np.float32)
        img_out = array_to_img(array, dim_ordering)
        # fpath_out = os.path.join(dname_outputs, fnames[i])
        fpath_out = os.path.join(dname_outputs, "%05d.png"%(n))
        img_out.save(fpath_out)
        n = n + 1

    print('==> done')

    n = 0
    dice_eval = []

    for i in range(len(fpaths_xs_test)):
        # テスト画像
        im1 = Image.open(fpaths_xs_test[i])
        im1 = im1.resize((320,240)) 
        # 出力結果
        im2 = Image.open(os.path.join(dname_outputs, "%05d.png"%(n)))
        im2 = im2.resize((320,240))
        # Grond Truth
        im3 = Image.open(fpaths_ys_test[i])
        im3 = im3.resize((320,240))
        im3 = im3.convert('L')
        
        im2_d = np.zeros((240,320,3), 'uint8')
        im2_d[:,:,0] = np.array(im2)
        im2_d[:,:,1] = np.array(im3)*255
        im2_d[:,:,2] = 0

        # Compute dice coeff
        im2a = np.array(im2)
        im2a[im2a > 0] = 1
        im3a = np.array(im3)
        im3a[im3a > 0] = 1

        overlap_a = np.array(im2a) * np.array(im3a)
        overlap_b = np.array(im2a) + np.array(im3a)
        #print('%03d: Dice Coeff = %f'%(i, 2*sum(sum(overlap_a))/sum(sum(overlap_b))))
        #print('%f'%img_dice_coeff(im2,im3))
        dice_eval.append(2*sum(sum(overlap_a))/sum(sum(overlap_b)))

        print('%d: Dice eval : %f'%(n,2*sum(sum(overlap_a))/sum(sum(overlap_b))))  
        
        plt.imshow(np.hstack((np.array(im1),np.array(im2_d))))
        plt.show()

        n = n + 1
    
    print('%d: Dice eval av. : %f'%(epoch,np.mean(np.array(dice_eval))))
==> done
saving outputs as images...
==> done
0: Dice eval : 0.963610
1: Dice eval : 0.969938
2: Dice eval : 0.972410
3: Dice eval : 0.968561
4: Dice eval : 0.962743
5: Dice eval : 0.880466
6: Dice eval : 0.966292
7: Dice eval : 0.945195
8: Dice eval : 0.958333
9: Dice eval : 0.974824
10: Dice eval : 0.960573
11: Dice eval : 0.942149
12: Dice eval : 0.962156
13: Dice eval : 0.964667
14: Dice eval : 0.953297
15: Dice eval : 0.967719
16: Dice eval : 0.941374
17: Dice eval : 0.972678
18: Dice eval : 0.954054
19: Dice eval : 0.951244
20: Dice eval : 0.958855
21: Dice eval : 0.945518
22: Dice eval : 0.954463
23: Dice eval : 0.951220
24: Dice eval : 0.970552
25: Dice eval : 0.974607
26: Dice eval : 0.965691
27: Dice eval : 0.943249
28: Dice eval : 0.948949
29: Dice eval : 0.961202
30: Dice eval : 0.955348
31: Dice eval : 0.966559
32: Dice eval : 0.945000
33: Dice eval : 0.951579
34: Dice eval : 0.964992
35: Dice eval : 0.960802
36: Dice eval : 0.973188
37: Dice eval : 0.976812
38: Dice eval : 0.886256
39: Dice eval : 0.926391
40: Dice eval : 0.958904
41: Dice eval : 0.954407
42: Dice eval : 0.958633
43: Dice eval : 0.933934
44: Dice eval : 0.952030
45: Dice eval : 0.969925
46: Dice eval : 0.959696
47: Dice eval : 0.976114
48: Dice eval : 0.957797
49: Dice eval : 0.927318
50: Dice eval : 0.912322
51: Dice eval : 0.958393
52: Dice eval : 0.951327
53: Dice eval : 0.953092
54: Dice eval : 0.939467
55: Dice eval : 0.967742
56: Dice eval : 0.967225
57: Dice eval : 0.966640
58: Dice eval : 0.974057
59: Dice eval : 0.968720
60: Dice eval : 0.894942
61: Dice eval : 0.943885
62: Dice eval : 0.956316
63: Dice eval : 0.956797
64: Dice eval : 0.970276
65: Dice eval : 0.935567
66: Dice eval : 0.934046
67: Dice eval : 0.962865
68: Dice eval : 0.964455
69: Dice eval : 0.870406
70: Dice eval : 0.966045
71: Dice eval : 0.955854
72: Dice eval : 0.938667
73: Dice eval : 0.929134
74: Dice eval : 0.953596
75: Dice eval : 0.946341
76: Dice eval : 0.934513
77: Dice eval : 0.910314
78: Dice eval : 0.938704
79: Dice eval : 0.866083
80: Dice eval : 0.973555
81: Dice eval : 0.933450
82: Dice eval : 0.895899
83: Dice eval : 0.925329
84: Dice eval : 0.948481
85: Dice eval : 0.942731
86: Dice eval : 0.915183
87: Dice eval : 0.916179
88: Dice eval : 0.961609
89: Dice eval : 0.973009
90: Dice eval : 0.931034
91: Dice eval : 0.970663
92: Dice eval : 0.959796
93: Dice eval : 0.936652
94: Dice eval : 0.957865
95: Dice eval : 0.912621
96: Dice eval : 0.923899
97: Dice eval : 0.966173
98: Dice eval : 0.939799
99: Dice eval : 0.945455
100: Dice eval : 0.916399
101: Dice eval : 0.958963
102: Dice eval : 0.968641
103: Dice eval : 0.972683
104: Dice eval : 0.936061
105: Dice eval : 0.930736
106: Dice eval : 0.920000
107: Dice eval : 0.964043
108: Dice eval : 0.966942
109: Dice eval : 0.940754
110: Dice eval : 0.931915
111: Dice eval : 0.962762
112: Dice eval : 0.965247
113: Dice eval : 0.974665
114: Dice eval : 0.962089
115: Dice eval : 0.930591
116: Dice eval : 0.954733
117: Dice eval : 0.957606
118: Dice eval : 0.946067
119: Dice eval : 0.980874
120: Dice eval : 0.918848
121: Dice eval : 0.932489
122: Dice eval : 0.965174
123: Dice eval : 0.967099
124: Dice eval : 0.981505
125: Dice eval : 0.962413
126: Dice eval : 0.894515
127: Dice eval : 0.954225
128: Dice eval : 0.950820
129: Dice eval : 0.927374
130: Dice eval : 0.974244
131: Dice eval : 0.968188
132: Dice eval : 0.961977
133: Dice eval : 0.951149
134: Dice eval : 0.973180
135: Dice eval : 0.980867
136: Dice eval : 0.972616
137: Dice eval : 0.812749
138: Dice eval : 0.956989
139: Dice eval : 0.927273
140: Dice eval : 0.956032
141: Dice eval : 0.933586
142: Dice eval : 0.908123
143: Dice eval : 0.957597
144: Dice eval : 0.967930
145: Dice eval : 0.971335
146: Dice eval : 0.963566
147: Dice eval : 0.957031
148: Dice eval : 0.938689
149: Dice eval : 0.960526
150: Dice eval : 0.952096
151: Dice eval : 0.942857
152: Dice eval : 0.945973
153: Dice eval : 0.932432
154: Dice eval : 0.710027
155: Dice eval : 0.914742
156: Dice eval : 0.882440
157: Dice eval : 0.608178
158: Dice eval : 0.785447
159: Dice eval : 0.854291
160: Dice eval : 0.934247
161: Dice eval : 0.819398
162: Dice eval : 0.850547
163: Dice eval : 0.877863
164: Dice eval : 0.528684
165: Dice eval : 0.686971
166: Dice eval : 0.940397
167: Dice eval : 0.507519
168: Dice eval : 0.836701
169: Dice eval : 0.767665
170: Dice eval : 0.647826
171: Dice eval : 0.829710
172: Dice eval : 0.668596
173: Dice eval : 0.832421
174: Dice eval : 0.590650
175: Dice eval : 0.608879
176: Dice eval : 0.948428
177: Dice eval : 0.964952
178: Dice eval : 0.961661
179: Dice eval : 0.978499
180: Dice eval : 0.958078
181: Dice eval : 0.897833
182: Dice eval : 0.945541
183: Dice eval : 0.961870
184: Dice eval : 0.960796
185: Dice eval : 0.955141
186: Dice eval : 0.969212
187: Dice eval : 0.959805
188: Dice eval : 0.975824
189: Dice eval : 0.966327
190: Dice eval : 0.975534
191: Dice eval : 0.971751
192: Dice eval : 0.943625
193: Dice eval : 0.954505
194: Dice eval : 0.942880
195: Dice eval : 0.969764
196: Dice eval : 0.972443
197: Dice eval : 0.939366
198: Dice eval : 0.953713
199: Dice eval : 0.986580
200: Dice eval : 0.948886
201: Dice eval : 0.931398
202: Dice eval : 0.969114
203: Dice eval : 0.970528
204: Dice eval : 0.881356
205: Dice eval : 0.939245
206: Dice eval : 0.969085
207: Dice eval : 0.972446
208: Dice eval : 0.951128
209: Dice eval : 0.967742
210: Dice eval : 0.954911
211: Dice eval : 0.972807
212: Dice eval : 0.914729
213: Dice eval : 0.923810
214: Dice eval : 0.975259
215: Dice eval : 0.968370
216: Dice eval : 0.939326
217: Dice eval : 0.923567
218: Dice eval : 0.965718
219: Dice eval : 0.976744
220: Dice eval : 0.961806
221: Dice eval : 0.947522
222: Dice eval : 0.968564
223: Dice eval : 0.980480
224: Dice eval : 0.908696
225: Dice eval : 0.900585
226: Dice eval : 0.964481
227: Dice eval : 0.953533
228: Dice eval : 0.891353
229: Dice eval : 0.942717
230: Dice eval : 0.977562
231: Dice eval : 0.975293
232: Dice eval : 0.912281
233: Dice eval : 0.894737
234: Dice eval : 0.956825
235: Dice eval : 0.967023
236: Dice eval : 0.944321
237: Dice eval : 0.947368
238: Dice eval : 0.974983
239: Dice eval : 0.974962
240: Dice eval : 0.937743
241: Dice eval : 0.963526
242: Dice eval : 0.918580
243: Dice eval : 0.956855
244: Dice eval : 0.943005
245: Dice eval : 0.968230
246: Dice eval : 0.969318
247: Dice eval : 0.977987
248: Dice eval : 0.959381
249: Dice eval : 0.954023
250: Dice eval : 0.946794
251: Dice eval : 0.966453
252: Dice eval : 0.949914
253: Dice eval : 0.941176
254: Dice eval : 0.700637
255: Dice eval : 0.780460
256: Dice eval : 0.818182
257: Dice eval : 0.863124
258: Dice eval : 0.633081
259: Dice eval : 0.870881
260: Dice eval : 0.529032
261: Dice eval : 0.666667
262: Dice eval : 0.958421
263: Dice eval : 0.969860
264: Dice eval : 0.907489
265: Dice eval : 0.944578
266: Dice eval : 0.967742
267: Dice eval : 0.970725
268: Dice eval : 0.962109
269: Dice eval : 0.932134
100: Dice eval av. : 0.928257
In [19]:
#
#   Show History
#
mode = "SHOW_HISTORY"
if mode == "SHOW_HISTORY":
    # load pickle
    print(dname_checkpoints + '/' + fname_history)
    history = pickle.load(open(dname_checkpoints + '/' + fname_history, 'rb'))
    
    for k in history.keys():
        plt.plot(history[k])
        plt.title(k)
        plt.show()
checkpoints_fcn00/history.pkl